/* Ensure real hardware interrupts are enabled. */
v->arch.guest_context.user_regs.eflags |= EF_IE;
} else {
- __vmwrite(GUEST_EFLAGS, v->arch.guest_context.user_regs.eflags);
+ __vmwrite(GUEST_RFLAGS, v->arch.guest_context.user_regs.eflags);
if (v->arch.guest_context.user_regs.eflags & EF_TF)
__vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
else
{
unsigned long current_eip;
- __vmread(GUEST_EIP, ¤t_eip);
- __vmwrite(GUEST_EIP, current_eip + inst_len);
+ __vmread(GUEST_RIP, ¤t_eip);
+ __vmwrite(GUEST_RIP, current_eip + inst_len);
}
#if VMX_DEBUG
{
- __vmread(GUEST_EIP, &eip);
+ __vmread(GUEST_RIP, &eip);
VMX_DBG_LOG(DBG_LEVEL_VMMU,
"vmx_do_page_fault = 0x%lx, eip = %lx, error_code = %lx",
va, eip, (unsigned long)regs->error_code);
#if 0
if ( !result )
{
- __vmread(GUEST_EIP, &eip);
+ __vmread(GUEST_RIP, &eip);
printk("vmx pgfault to guest va=%p eip=%p\n", va, eip);
}
#endif
unsigned long eip, error_code;
unsigned long intr_fields;
- __vmread(GUEST_EIP, &eip);
+ __vmread(GUEST_RIP, &eip);
__vmread(VM_EXIT_INTR_ERROR_CODE, &error_code);
VMX_DBG_LOG(DBG_LEVEL_1,
unsigned int eax, ebx, ecx, edx;
unsigned long eip;
- __vmread(GUEST_EIP, &eip);
+ __vmread(GUEST_RIP, &eip);
VMX_DBG_LOG(DBG_LEVEL_1,
"do_cpuid: (eax) %lx, (ebx) %lx, (ecx) %lx, (edx) %lx,"
struct vcpu *v = current;
unsigned long eip;
- __vmread(GUEST_EIP, &eip);
+ __vmread(GUEST_RIP, &eip);
reg = exit_qualification & DEBUG_REG_ACCESS_NUM;
v->arch.guest_context.debugreg[reg] = *reg_p;
else {
unsigned long value;
- __vmread(GUEST_ESP, &value);
+ __vmread(GUEST_RSP, &value);
v->arch.guest_context.debugreg[reg] = value;
}
break;
if (reg != REG_ESP)
*reg_p = v->arch.guest_context.debugreg[reg];
else {
- __vmwrite(GUEST_ESP, v->arch.guest_context.debugreg[reg]);
+ __vmwrite(GUEST_RSP, v->arch.guest_context.debugreg[reg]);
}
break;
}
unsigned long eip;
struct vcpu *v = current;
- __vmread(GUEST_EIP, &eip);
+ __vmread(GUEST_RIP, &eip);
VMX_DBG_LOG(DBG_LEVEL_VMMU, "vmx_vmexit_do_invlpg: eip=%lx, va=%lx",
eip, va);
unsigned long eip, cs, eflags;
int vm86;
- __vmread(GUEST_EIP, &eip);
+ __vmread(GUEST_RIP, &eip);
__vmread(GUEST_CS_SELECTOR, &cs);
- __vmread(GUEST_EFLAGS, &eflags);
+ __vmread(GUEST_RFLAGS, &eflags);
vm86 = eflags & X86_EFLAGS_VM ? 1 : 0;
VMX_DBG_LOG(DBG_LEVEL_1,
int error = 0;
error |= __vmread(INSTRUCTION_LEN, &inst_len);
- error |= __vmread(GUEST_EIP, &c->eip);
+ error |= __vmread(GUEST_RIP, &c->eip);
c->eip += inst_len; /* skip transition instruction */
- error |= __vmread(GUEST_ESP, &c->esp);
- error |= __vmread(GUEST_EFLAGS, &c->eflags);
+ error |= __vmread(GUEST_RSP, &c->esp);
+ error |= __vmread(GUEST_RFLAGS, &c->eflags);
error |= __vmread(CR0_READ_SHADOW, &c->cr0);
c->cr3 = d->arch.arch_vmx.cpu_cr3;
unsigned long mfn, old_cr4;
int error = 0;
- error |= __vmwrite(GUEST_EIP, c->eip);
- error |= __vmwrite(GUEST_ESP, c->esp);
- error |= __vmwrite(GUEST_EFLAGS, c->eflags);
+ error |= __vmwrite(GUEST_RIP, c->eip);
+ error |= __vmwrite(GUEST_RSP, c->esp);
+ error |= __vmwrite(GUEST_RFLAGS, c->eflags);
error |= __vmwrite(CR0_READ_SHADOW, c->cr0);
* a partition disables the CR0.PE bit.
*/
if ((value & X86_CR0_PE) == 0) {
- __vmread(GUEST_EIP, &eip);
+ __vmread(GUEST_RIP, &eip);
VMX_DBG_LOG(DBG_LEVEL_1,
"Disabling CR0.PE at %%eip 0x%lx\n", eip);
if (vmx_assist(d, VMX_ASSIST_INVOKE)) {
set_bit(VMX_CPU_STATE_ASSIST_ENABLED, &d->arch.arch_vmx.cpu_state);
- __vmread(GUEST_EIP, &eip);
+ __vmread(GUEST_RIP, &eip);
VMX_DBG_LOG(DBG_LEVEL_1,
"Transfering control to vmxassist %%eip 0x%lx\n", eip);
return 0; /* do not update eip! */
}
} else if (test_bit(VMX_CPU_STATE_ASSIST_ENABLED,
&d->arch.arch_vmx.cpu_state)) {
- __vmread(GUEST_EIP, &eip);
+ __vmread(GUEST_RIP, &eip);
VMX_DBG_LOG(DBG_LEVEL_1,
"Enabling CR0.PE at %%eip 0x%lx\n", eip);
if (vmx_assist(d, VMX_ASSIST_RESTORE)) {
clear_bit(VMX_CPU_STATE_ASSIST_ENABLED,
&d->arch.arch_vmx.cpu_state);
- __vmread(GUEST_EIP, &eip);
+ __vmread(GUEST_RIP, &eip);
VMX_DBG_LOG(DBG_LEVEL_1,
"Restoring to %%eip 0x%lx\n", eip);
return 0; /* do not update eip! */
CASE_GET_REG(ESI, esi);
CASE_GET_REG(EDI, edi);
case REG_ESP:
- __vmread(GUEST_ESP, &value);
+ __vmread(GUEST_RSP, &value);
break;
default:
printk("invalid gp: %d\n", gp);
CASE_SET_REG(ESI, esi);
CASE_SET_REG(EDI, edi);
case REG_ESP:
- __vmwrite(GUEST_ESP, value);
+ __vmwrite(GUEST_RSP, value);
regs->esp = value;
break;
default:
{
#if VMX_DEBUG
unsigned long eip;
- __vmread(GUEST_EIP, &eip);
+ __vmread(GUEST_RIP, &eip);
#endif
VMX_DBG_LOG(DBG_LEVEL_1, "vmx_vmexit_do_hlt:eip=%lx", eip);
raise_softirq(SCHEDULE_SOFTIRQ);
{
#if VMX_DEBUG
unsigned long eip;
- __vmread(GUEST_EIP, &eip);
+ __vmread(GUEST_RIP, &eip);
#endif
VMX_DBG_LOG(DBG_LEVEL_1, "vmx_vmexit_do_mwait:eip=%lx", eip);
raise_softirq(SCHEDULE_SOFTIRQ);
void save_vmx_cpu_user_regs(struct cpu_user_regs *ctxt)
{
__vmread(GUEST_SS_SELECTOR, &ctxt->ss);
- __vmread(GUEST_ESP, &ctxt->esp);
- __vmread(GUEST_EFLAGS, &ctxt->eflags);
+ __vmread(GUEST_RSP, &ctxt->esp);
+ __vmread(GUEST_RFLAGS, &ctxt->eflags);
__vmread(GUEST_CS_SELECTOR, &ctxt->cs);
- __vmread(GUEST_EIP, &ctxt->eip);
+ __vmread(GUEST_RIP, &ctxt->eip);
__vmread(GUEST_GS_SELECTOR, &ctxt->gs);
__vmread(GUEST_FS_SELECTOR, &ctxt->fs);
void save_cpu_user_regs(struct cpu_user_regs *regs)
{
__vmread(GUEST_SS_SELECTOR, ®s->xss);
- __vmread(GUEST_ESP, ®s->esp);
- __vmread(GUEST_EFLAGS, ®s->eflags);
+ __vmread(GUEST_RSP, ®s->esp);
+ __vmread(GUEST_RFLAGS, ®s->eflags);
__vmread(GUEST_CS_SELECTOR, ®s->xcs);
- __vmread(GUEST_EIP, ®s->eip);
+ __vmread(GUEST_RIP, ®s->eip);
__vmread(GUEST_GS_SELECTOR, ®s->xgs);
__vmread(GUEST_FS_SELECTOR, ®s->xfs);
void restore_cpu_user_regs(struct cpu_user_regs *regs)
{
__vmwrite(GUEST_SS_SELECTOR, regs->xss);
- __vmwrite(GUEST_ESP, regs->esp);
- __vmwrite(GUEST_EFLAGS, regs->eflags);
+ __vmwrite(GUEST_RSP, regs->esp);
+ __vmwrite(GUEST_RFLAGS, regs->eflags);
__vmwrite(GUEST_CS_SELECTOR, regs->xcs);
- __vmwrite(GUEST_EIP, regs->eip);
+ __vmwrite(GUEST_RIP, regs->eip);
__vmwrite(GUEST_GS_SELECTOR, regs->xgs);
__vmwrite(GUEST_FS_SELECTOR, regs->xfs);
return;
}
- __vmread(GUEST_EIP, &eip);
+ __vmread(GUEST_RIP, &eip);
TRACE_3D(TRC_VMX_VMEXIT, v->domain->domain_id, eip, exit_reason);
switch (exit_reason) {
}
case EXIT_REASON_VMCALL:
__get_instruction_length(inst_len);
- __vmread(GUEST_EIP, &eip);
+ __vmread(GUEST_RIP, &eip);
__vmread(EXIT_QUALIFICATION, &exit_qualification);
vmx_print_line(regs.eax, v); /* provides the current domain */
break;
case EXIT_REASON_CR_ACCESS:
{
- __vmread(GUEST_EIP, &eip);
+ __vmread(GUEST_RIP, &eip);
__get_instruction_length(inst_len);
__vmread(EXIT_QUALIFICATION, &exit_qualification);
__update_guest_eip(inst_len);
break;
case EXIT_REASON_MSR_WRITE:
- __vmread(GUEST_EIP, &eip);
+ __vmread(GUEST_RIP, &eip);
VMX_DBG_LOG(DBG_LEVEL_1, "MSR_WRITE: eip=%lx, eax=%lx, edx=%lx",
eip, (unsigned long)regs.eax, (unsigned long)regs.edx);
/* just ignore this point */
* Write the guest register value into VMCS
*/
__vmwrite(GUEST_SS_SELECTOR, regs->ss);
- __vmwrite(GUEST_ESP, regs->esp);
- __vmwrite(GUEST_EFLAGS, regs->eflags);
+ __vmwrite(GUEST_RSP, regs->esp);
+ __vmwrite(GUEST_RFLAGS, regs->eflags);
__vmwrite(GUEST_CS_SELECTOR, regs->cs);
- __vmwrite(GUEST_EIP, regs->eip);
+ __vmwrite(GUEST_RIP, regs->eip);
}
static void set_reg_value (int size, int index, int seg, struct cpu_user_regs *regs, long value)
return;
}
- __vmread(GUEST_EFLAGS, &eflags);
+ __vmread(GUEST_RFLAGS, &eflags);
if (irq_masked(eflags)) {
VMX_DBG_LOG(DBG_LEVEL_1, "guesting pending: %x, eflags: %lx",
highest_vector, eflags);
__vmwrite(GUEST_CR3, pagetable_get_paddr(d->domain->arch.phys_table));
__vmwrite(HOST_CR3, pagetable_get_paddr(d->arch.monitor_table));
- __vmwrite(HOST_ESP, (unsigned long)get_stack_bottom());
+ __vmwrite(HOST_RSP, (unsigned long)get_stack_bottom());
if (event_pending(d)) {
vmx_check_events(d);
void store_cpu_user_regs(struct cpu_user_regs *regs)
{
__vmread(GUEST_SS_SELECTOR, ®s->ss);
- __vmread(GUEST_ESP, ®s->esp);
- __vmread(GUEST_EFLAGS, ®s->eflags);
+ __vmread(GUEST_RSP, ®s->esp);
+ __vmread(GUEST_RFLAGS, ®s->eflags);
__vmread(GUEST_CS_SELECTOR, ®s->cs);
__vmread(GUEST_DS_SELECTOR, ®s->ds);
__vmread(GUEST_ES_SELECTOR, ®s->es);
- __vmread(GUEST_EIP, ®s->eip);
+ __vmread(GUEST_RIP, ®s->eip);
}
static long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs)
unsigned long eflags;
int index, vm86 = 0;
- __vmread(GUEST_EFLAGS, &eflags);
+ __vmread(GUEST_RFLAGS, &eflags);
if (eflags & X86_EFLAGS_VM)
vm86 = 1;
mpci_p = ¤t->arch.arch_vmx.vmx_platform.mpci;
inst_decoder_regs = mpci_p->inst_decoder_regs;
- __vmread(GUEST_EIP, &eip);
+ __vmread(GUEST_RIP, &eip);
__vmread(INSTRUCTION_LEN, &inst_len);
- __vmread(GUEST_EFLAGS, &eflags);
+ __vmread(GUEST_RFLAGS, &eflags);
vm86 = eflags & X86_EFLAGS_VM;
if (vm86) {
if (vmx_decode(check_prefix(inst, &mmio_inst), &mmio_inst) == DECODE_failure)
domain_crash_synchronous();
- __vmwrite(GUEST_EIP, eip + inst_len);
+ __vmwrite(GUEST_RIP, eip + inst_len);
store_cpu_user_regs(inst_decoder_regs);
// Only handle "mov" and "movs" instructions!
__vmwrite(GUEST_CR3, pagetable_get_paddr(v->arch.guest_table));
__vmwrite(HOST_CR3, pagetable_get_paddr(v->arch.monitor_table));
- __vmwrite(HOST_ESP, (unsigned long)get_stack_bottom());
+ __vmwrite(HOST_RSP, (unsigned long)get_stack_bottom());
v->arch.schedule_tail = arch_vmx_do_resume;
}
error |= __vmwrite(GUEST_GS_BASE, host_env->ds_base);
error |= __vmwrite(GUEST_IDTR_BASE, host_env->idtr_base);
- error |= __vmwrite(GUEST_ESP, regs->esp);
- error |= __vmwrite(GUEST_EIP, regs->eip);
+ error |= __vmwrite(GUEST_RSP, regs->esp);
+ error |= __vmwrite(GUEST_RIP, regs->eip);
eflags = regs->eflags & ~VMCS_EFLAGS_RESERVED_0; /* clear 0s */
eflags |= VMCS_EFLAGS_RESERVED_1; /* set 1s */
- error |= __vmwrite(GUEST_EFLAGS, eflags);
+ error |= __vmwrite(GUEST_RFLAGS, eflags);
error |= __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
__asm__ __volatile__ ("mov %%dr7, %0\n" : "=r" (dr7));
error |= __vmwrite(GUEST_DR7, dr7);
- error |= __vmwrite(GUEST_VMCS0, 0xffffffff);
- error |= __vmwrite(GUEST_VMCS1, 0xffffffff);
+ error |= __vmwrite(VMCS_LINK_POINTER, 0xffffffff);
+ error |= __vmwrite(VMCS_LINK_POINTER_HIGH, 0xffffffff);
return error;
}
__asm__ __volatile__ ("mov %%cr4,%0" : "=r" (crn) : );
host_env->cr4 = crn;
error |= __vmwrite(HOST_CR4, crn);
- error |= __vmwrite(HOST_EIP, (unsigned long) vmx_asm_vmexit_handler);
+ error |= __vmwrite(HOST_RIP, (unsigned long) vmx_asm_vmexit_handler);
return error;
}
if ( VMX_DOMAIN(current) && (regs->eflags == 0) )
{
- __vmread(GUEST_EIP, &eip);
- __vmread(GUEST_ESP, &esp);
- __vmread(GUEST_EFLAGS, &eflags);
+ __vmread(GUEST_RIP, &eip);
+ __vmread(GUEST_RSP, &esp);
+ __vmread(GUEST_RFLAGS, &eflags);
__vmread(GUEST_SS_SELECTOR, &ss);
__vmread(GUEST_DS_SELECTOR, &ds);
__vmread(GUEST_ES_SELECTOR, &es);
* Need fill bits for SENTER
*/
-#define MONITOR_PIN_BASED_EXEC_CONTROLS 0x0000001f
-#define MONITOR_CPU_BASED_EXEC_CONTROLS 0x0581e7f2
-#define MONITOR_VM_EXIT_CONTROLS 0x0003edff
-#define MONITOR_VM_ENTRY_CONTROLS 0x000011ff
+#define MONITOR_PIN_BASED_EXEC_CONTROLS_RESERVED_VALUE 0x00000016
+#define MONITOR_PIN_BASED_EXEC_CONTROLS \
+ MONITOR_PIN_BASED_EXEC_CONTROLS_RESERVED_VALUE | \
+ PIN_BASED_EXT_INTR_MASK | \
+ PIN_BASED_NMI_EXITING
+
+#define MONITOR_CPU_BASED_EXEC_CONTROLS_RESERVED_VALUE 0x0401e172
+
+#define MONITOR_CPU_BASED_EXEC_CONTROLS \
+ MONITOR_CPU_BASED_EXEC_CONTROLS_RESERVED_VALUE | \
+ CPU_BASED_HLT_EXITING | \
+ CPU_BASED_INVDPG_EXITING | \
+ CPU_BASED_MWAIT_EXITING | \
+ CPU_BASED_MOV_DR_EXITING | \
+ CPU_BASED_UNCOND_IO_EXITING | \
+ CPU_BASED_CR8_LOAD_EXITING | \
+ CPU_BASED_CR8_STORE_EXITING
+
+#define MONITOR_VM_EXIT_CONTROLS_RESERVED_VALUE 0x0003edff
+
+#define VM_EXIT_CONTROLS_IA_32E_MODE 0x00000200
+
+#define MONITOR_VM_EXIT_CONTROLS \
+ MONITOR_VM_EXIT_CONTROLS_RESERVED_VALUE |\
+ VM_EXIT_ACK_INTR_ON_EXIT
+
+#define VM_ENTRY_CONTROLS_RESERVED_VALUE 0x000011ff
+#define VM_ENTRY_CONTROLS_IA_32E_MODE 0x00000200
+#define MONITOR_VM_ENTRY_CONTROLS VM_ENTRY_CONTROLS_RESERVED_VALUE
/*
* Exit Reasons
*/
#define TYPE_MOV_FROM_CR (1 << 4)
#define TYPE_CLTS (2 << 4)
#define TYPE_LMSW (3 << 4)
-#define CONTROL_REG_ACCESS_REG 0x700 /* 10:8, general purpose register */
+#define CONTROL_REG_ACCESS_REG 0xf00 /* 10:8, general purpose register */
+#define LMSW_SOURCE_DATA (0xFFFF << 16) /* 16:31 lmsw source */
#define REG_EAX (0 << 8)
#define REG_ECX (1 << 8)
#define REG_EDX (2 << 8)
#define REG_EBP (5 << 8)
#define REG_ESI (6 << 8)
#define REG_EDI (7 << 8)
-#define LMSW_SOURCE_DATA (0xFFFF << 16) /* 16:31 lmsw source */
+#define REG_R8 (8 << 8)
+#define REG_R9 (9 << 8)
+#define REG_R10 (10 << 8)
+#define REG_R11 (11 << 8)
+#define REG_R12 (12 << 8)
+#define REG_R13 (13 << 8)
+#define REG_R14 (14 << 8)
+#define REG_R15 (15 << 8)
/*
* Exit Qualifications for MOV for Debug Register Access
#define DEBUG_REG_ACCESS_TYPE 0x10 /* 4, direction of access */
#define TYPE_MOV_TO_DR (0 << 4)
#define TYPE_MOV_FROM_DR (1 << 4)
-#define DEBUG_REG_ACCESS_REG 0x700 /* 11:8, general purpose register */
+#define DEBUG_REG_ACCESS_REG 0xf00 /* 11:8, general purpose register */
#define EXCEPTION_BITMAP_DE (1 << 0) /* Divide Error */
#define EXCEPTION_BITMAP_DB (1 << 1) /* Debug */
void vmx_enter_scheduler(void);
-#define VMX_CPU_STATE_ASSIST_ENABLED 1
+enum {
+ VMX_CPU_STATE_PG_ENABLED=0,
+ VMX_CPU_STATE_PAE_ENABLED,
+ VMX_CPU_STATE_LME_ENABLED,
+ VMX_CPU_STATE_LMA_ENABLED,
+ VMX_CPU_STATE_ASSIST_ENABLED,
+};
+
+#define VMX_LONG_GUEST(ed) \
+ (test_bit(VMX_CPU_STATE_LMA_ENABLED, &ed->arch.arch_vmx.cpu_state))
struct vmcs_struct {
u32 vmcs_revision_id;
- unsigned char data [0x1000 - sizeof (u32)];
+ unsigned char data [0]; /* vmcs size is read from MSR */
+};
+
+enum {
+ VMX_INDEX_MSR_LSTAR = 0,
+ VMX_INDEX_MSR_STAR,
+ VMX_INDEX_MSR_CSTAR,
+ VMX_INDEX_MSR_SYSCALL_MASK,
+ VMX_INDEX_MSR_EFER,
+
+ VMX_MSR_COUNT,
+};
+
+struct msr_state{
+ unsigned long flags;
+ unsigned long msr_items[VMX_MSR_COUNT];
+ unsigned long shadow_gs;
};
struct arch_vmx_struct {
unsigned long cpu_cr2; /* save CR2 */
unsigned long cpu_cr3;
unsigned long cpu_state;
+ struct msr_state msr_content;
struct virutal_platform_def vmx_platform;
};
#define VMCS_USE_HOST_ENV 1
#define VMCS_USE_SEPARATE_ENV 0
+/* this works for both 32bit & 64bit eflags filteration done in construct_init_vmcs_guest() */
#define VMCS_EFLAGS_RESERVED_0 0xffc08028 /* bitmap for 0 */
#define VMCS_EFLAGS_RESERVED_1 0x00000002 /* bitmap for 1 */
extern int vmcs_version;
+#define CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004
+#define CPU_BASED_USE_TSC_OFFSETING 0x00000008
+#define CPU_BASED_HLT_EXITING 0x00000080
+#define CPU_BASED_INVDPG_EXITING 0x00000200
+#define CPU_BASED_MWAIT_EXITING 0x00000400
+#define CPU_BASED_RDPMC_EXITING 0x00000800
+#define CPU_BASED_RDTSC_EXITING 0x00001000
+#define CPU_BASED_CR8_LOAD_EXITING 0x00080000
+#define CPU_BASED_CR8_STORE_EXITING 0x00100000
+#define CPU_BASED_TPR_SHADOW 0x00200000
+#define CPU_BASED_MOV_DR_EXITING 0x00800000
+#define CPU_BASED_UNCOND_IO_EXITING 0x01000000
+#define CPU_BASED_ACTIVATE_IO_BITMAP 0x02000000
+#define CPU_BASED_MONITOR_EXITING 0x20000000
+#define CPU_BASED_PAUSE_EXITING 0x40000000
+#define PIN_BASED_EXT_INTR_MASK 0x1
+#define PIN_BASED_NMI_EXITING 0x8
+
+#define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000
+#define VM_EXIT_HOST_ADD_SPACE_SIZE 0x00000200
+
+
/* VMCS Encordings */
enum vmcs_field {
GUEST_ES_SELECTOR = 0x00000800,
HOST_GS_SELECTOR = 0x00000c0a,
HOST_TR_SELECTOR = 0x00000c0c,
IO_BITMAP_A = 0x00002000,
+ IO_BITMAP_A_HIGH = 0x00002001,
IO_BITMAP_B = 0x00002002,
+ IO_BITMAP_B_HIGH = 0x00002003,
VM_EXIT_MSR_STORE_ADDR = 0x00002006,
+ VM_EXIT_MSR_STORE_ADDR_HIGH = 0x00002007,
VM_EXIT_MSR_LOAD_ADDR = 0x00002008,
+ VM_EXIT_MSR_LOAD_ADDR_HIGH = 0x00002009,
VM_ENTRY_MSR_LOAD_ADDR = 0x0000200a,
+ VM_ENTRY_MSR_LOAD_ADDR_HIGH = 0x0000200b,
TSC_OFFSET = 0x00002010,
- GUEST_VMCS0 = 0x00002800,
- GUEST_VMCS1 = 0x00002801,
+ TSC_OFFSET_HIGH = 0x00002011,
+ VIRTUAL_APIC_PAGE_ADDR = 0x00002012,
+ VIRTUAL_APIC_PAGE_ADDR_HIGH = 0x00002013,
+ VMCS_LINK_POINTER = 0x00002800,
+ VMCS_LINK_POINTER_HIGH = 0x00002801,
GUEST_IA32_DEBUGCTL = 0x00002802,
+ GUEST_IA32_DEBUGCTL_HIGH = 0x00002803,
PIN_BASED_VM_EXEC_CONTROL = 0x00004000,
CPU_BASED_VM_EXEC_CONTROL = 0x00004002,
EXCEPTION_BITMAP = 0x00004004,
VM_ENTRY_MSR_LOAD_COUNT = 0x00004014,
VM_ENTRY_INTR_INFO_FIELD = 0x00004016,
VM_ENTRY_EXCEPTION_ERROR_CODE = 0x00004018,
+ VM_ENTRY_INSTRUCTION_LENGTH = 0x0000401a,
+ TPR_THRESHOLD = 0x0000401c,
+ SECONDARY_VM_EXEC_CONTROL = 0x0000401e,
+ VM_INSTRUCTION_ERROR = 0x00004400,
VM_EXIT_REASON = 0x00004402,
VM_EXIT_INTR_INFO = 0x00004404,
VM_EXIT_INTR_ERROR_CODE = 0x00004406,
IDT_VECTORING_INFO_FIELD = 0x00004408,
IDT_VECTORING_ERROR_CODE = 0x0000440a,
INSTRUCTION_LEN = 0x0000440c,
+ VMX_INSTRUCTION_INFO = 0x0000440e,
GUEST_ES_LIMIT = 0x00004800,
GUEST_CS_LIMIT = 0x00004802,
GUEST_SS_LIMIT = 0x00004804,
GUEST_LDTR_AR_BYTES = 0x00004820,
GUEST_TR_AR_BYTES = 0x00004822,
GUEST_INTERRUPTIBILITY_INFO = 0x00004824,
+ GUEST_SYSENTER_CS = 0x0000482A,
+ HOST_IA32_SYSENTER_CS = 0x00004c00,
CR0_GUEST_HOST_MASK = 0x00006000,
CR4_GUEST_HOST_MASK = 0x00006002,
CR0_READ_SHADOW = 0x00006004,
CR4_READ_SHADOW = 0x00006006,
- CR3_TARGET_VALUES = 0x00006008,
- CR3_GUEST_HOST_MASK = 0x00006208,
+ CR3_TARGET_VALUE0 = 0x00006008,
+ CR3_TARGET_VALUE1 = 0x0000600a,
+ CR3_TARGET_VALUE2 = 0x0000600c,
+ CR3_TARGET_VALUE3 = 0x0000600e,
EXIT_QUALIFICATION = 0x00006400,
- GUEST_LINEAR_ADDRESS = 0x0000640A,
+ GUEST_LINEAR_ADDRESS = 0x0000640a,
GUEST_CR0 = 0x00006800,
GUEST_CR3 = 0x00006802,
GUEST_CR4 = 0x00006804,
GUEST_GDTR_BASE = 0x00006816,
GUEST_IDTR_BASE = 0x00006818,
GUEST_DR7 = 0x0000681a,
- GUEST_ESP = 0x0000681c,
- GUEST_EIP = 0x0000681e,
- GUEST_EFLAGS = 0x00006820,
+ GUEST_RSP = 0x0000681c,
+ GUEST_RIP = 0x0000681e,
+ GUEST_RFLAGS = 0x00006820,
GUEST_PENDING_DBG_EXCEPTIONS = 0x00006822,
+ GUEST_SYSENTER_ESP = 0x00006824,
+ GUEST_SYSENTER_EIP = 0x00006826,
HOST_CR0 = 0x00006c00,
HOST_CR3 = 0x00006c02,
HOST_CR4 = 0x00006c04,
HOST_TR_BASE = 0x00006c0a,
HOST_GDTR_BASE = 0x00006c0c,
HOST_IDTR_BASE = 0x00006c0e,
- HOST_ESP = 0x00006c14,
- HOST_EIP = 0x00006c16,
+ HOST_IA32_SYSENTER_ESP = 0x00006c10,
+ HOST_IA32_SYSENTER_EIP = 0x00006c12,
+ HOST_RSP = 0x00006c14,
+ HOST_RIP = 0x00006c16,
};
#define VMX_DEBUG 1